微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

ValueError:数据基数不明确:x大小:1,500,500请提供共享相同第一维的数据

如何解决ValueError:数据基数不明确:x大小:1,500,500请提供共享相同第一维的数据

我的seq2seq模型完全适合数据,但是当我通过启动令牌执行使用编码器状态初始化的模型时,推理模型的预测方法会出现上述错误

据我了解,编码器状态的尺寸为(1,500,300),传递的令牌为(1,1,300)。 这是我有史以来第一个seq2seq模型,我在互联网上寻找任何类似的问题,并将我的模型与官方keras文档中的keras seq2seq模型进行了比较。

代码

encoder_inputs=Input(shape=(max_art_len,))
encoder_emb=Embedding(art_vocab_size,100,trainable=True,name='Encoder_Embedding_layer') 
                                                                                     (encoder_inputs)
encoder_lstm1=LSTM(300,return_sequences=True,return_state=True,name='Encoder_LSTM1')
enclstm1_outputs,enclstm1_h,enclstm1_c=encoder_lstm1(encoder_emb)


decoder_inputs=Input(shape=(None,))
decoder_em=Embedding(sum_vocab_size,name='Decoder_Embedding_layer')
decoder_emb=decoder_em(decoder_inputs)

decoder_lstm1=LSTM(300,name='Decoder_LSTM1')
declstm1_output,declstm1_h,declstm1_c=decoder_lstm1(decoder_emb,initial_state=[enclstm1_h,enclstm1_c])

output_layer=Timedistributed(Dense(sum_vocab_size,activation='softmax',name='softmax'))
output=output_layer(declstm1_output)

model=Model([encoder_inputs,decoder_inputs],output)

model.summary()


infencoder_model=Model(inputs=[encoder_inputs],outputs=[enclstm1_h,enclstm1_c])

infdecoder_model_state_input_h=Input(shape=(300,),name='infdec_I1')
infdecoder_model_state_input_c=Input(shape=(300,name='infdec_I2')

infdeclstm1_output,infdec_h,infdec_c=decoder_lstm1(decoder_emb,initial_state=[infdecoder_model_state_input_h,infdecoder_model_state_input_c
                                                                                               ])

infdec_output=output_layer(infdeclstm1_output)                         

infdecoder_model=Model(inputs=[decoder_inputs]+[infdecoder_model_state_input_h,infdecoder_model_state_input_c],outputs=[infdec_output]+[infdec_h,infdec_c])



def decode_sequence(inp_seq):
    
    enc_h,enc_c=infencoder_model.predict(inp_seq)
    
    tar_seq=np.zeros((1,1))
    tar_seq[0,0]=sum_wordindex['start']
    
    stop_loop=False
    decoded_string=''
    
    while not stop_loop:
        
        dec_out,dec_h,dec_c=infdecoder_model.predict([tar_seq]+[enc_h,enc_c])
        
        tar_token_index=np.argmax(dec_out[0,-1,:])
        tar_token_word=sum_wordindex[tar_token_index]
        
        if tar_token_word =='end' or len(decoded_string)>=max_art_len:
            
            stop_loop=True
        else:
            decoded_string+=tar_token_word
            
            tar_seq=np.zeros((1,1))
            tar_seq[0,0]=tar_token_index
            
            
            enc_h=dec_h
            enc_c=dec_c
            
    return decoded_string

def seq2art(inp_seq):
    
    art=''
    
    for i in range(len(inp_seq)):
        
        if inp_seq[i]==0:
            break
        art+=reverse_art_index[inp_seq[i]]+' '
        
    return art


def seq2sum(inp_seq):
    
    summary=''
    
    for i in range(len(inp_seq)):
        
        if inp_seq[i]==0:
            break
        word=reverse_sum_index[inp_seq[i]]
        summary+=word+' '
            
    return summary

print('Example Articel : '+'\n',seq2art(padded_X_val[2]))
print('Example Summary : '+'\n',seq2sum(padded_Y_val[2]))
print('Predicted Summary : '+'\n',decode_sequence(padded_X_val[2]))

错误

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-43-9ef680a87bdf> in <module>
      1 print('Example Articel : '+'\n',seq2art(padded_X_val[2]))
      2 print('Example Summary : '+'\n',seq2sum(padded_Y_val[2]))
----> 3 print('Predicted Summary : '+'\n',decode_sequence(padded_X_val[2]))

<ipython-input-41-c41e0caee91d> in decode_sequence(inp_seq)
     11     while not stop_loop:
     12 
---> 13         dec_out,dec_c=infdecoder_model.predict([tar_seq]+ 
      [enc_h,enc_c])
     14 
     15         tar_token_index=np.argmax(dec_out[0,:])

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self,*args,**kwargs)
    128       raise ValueError('{} is not supported in multi-worker mode.'.format(
    129           method.__name__))
--> 130     return method(self,**kwargs)
    131 
    132   return tf_decorator.make_decorator(

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in predict(self,x,batch_size,verbose,steps,callbacks,max_queue_size,workers,use_multiprocessing)
   1577           use_multiprocessing=use_multiprocessing,1578           model=self,-> 1579           steps_per_execution=self._steps_per_execution)
   1580 
   1581       # Container that configures and calls `tf.keras.Callback`s.

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self,y,sample_weight,steps_per_epoch,initial_epoch,epochs,shuffle,class_weight,use_multiprocessing,model,steps_per_execution)
   1115         use_multiprocessing=use_multiprocessing,1116         distribution_strategy=ds_context.get_strategy(),-> 1117         model=model)
   1118 
   1119     strategy = ds_context.get_strategy()

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self,sample_weights,sample_weight_modes,**kwargs)
    280             label,",".join(str(i.shape[0]) for i in nest.flatten(data)))
    281       msg += "Please provide data which shares the same first dimension."
--> 282       raise ValueError(msg)
    283     num_samples = num_samples.pop()
    284 

ValueError: Data cardinality is ambiguous:
  x sizes: 1,500
Please provide data which shares the same first dimension.

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。