微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

InvalidArgumentError: 矩阵大小不兼容: In[0]: [256,3], In[1]: [65,1] [[{{node density_51/BiasAdd}}]]

如何解决InvalidArgumentError: 矩阵大小不兼容: In[0]: [256,3], In[1]: [65,1] [[{{node density_51/BiasAdd}}]]

因为我是新人,请帮我解决。几个月以来我一直在努力。请忽略任何小错误并专注。提前致谢。自从我得到这个代码现在到达最底层以来,我一直在重塑,但仍然得到上面提到的这个错误作为标题。 如果您需要任何 Hd5 文件,请告诉我您的电子邮件 ID。

import tensorflow as tf
    tf.compat.v1.disable_v2_behavior()
    from __future__ import print_function,division
    import numpy as np
    import h5py
    import scipy.io
    import random
    import sys,os
    import itertools
    import numbers
    from collections import Counter
    from warnings import warn
    from abc import ABCMeta,abstractmethod
    from tensorflow import keras,reshape
    np.random.seed(1337)  
    import keras
    from keras.optimizers import RMSprop,SGD
    from keras.models import Sequential,model_from_yaml
    from keras.layers.core import Dense,Dropout,Activation,Flatten
    import keras.layers.core as core
    from tensorflow.keras.layers import Dense,Embedding,LSTM,Input,multiply,Reshape
    from keras.layers.convolutional import Convolution1D,MaxPooling1D
    from keras.layers.wrappers import Bidirectional
    from keras.constraints import maxnorm
    from keras.layers.recurrent import LSTM,GRU
    from keras.callbacks import ModelCheckpoint,EarlyStopping
    from keras.layers import Embedding
    from sklearn.metrics import fbeta_score,roc_curve,auc,roc_auc_score,average_precision_score
    import matplotlib.pyplot as plt
    from keras.regularizers import l2,l1,l1_l2
   
    from tensorflow.keras.models import Model 
    import tensorflow.keras.backend as K
    from keras.engine.topology import Layer
    from keras import activations,initializers,regularizers,constraints
    from keras.layers import Input
    from keras.layers import ActivityRegularization
    tf.compat.v1.disable_v2_behavior()
   
    class Attention(tf.keras.layers.Layer):
        def __init__(self,hidden,init='glorot_uniform',activation='linear',W_regularizer=None,b_regularizer=None,W_constraint=None,**kwargs):
          self.init = initializers.get(init)
          self.activation = activations.get(activation)
          self.W_regularizer = regularizers.get(W_regularizer)
          self.b_regularizer = regularizers.get(b_regularizer)
          self.W_constraint = constraints.get(W_constraint)
          self.hidden=hidden
          super(Attention,self).__init__(**kwargs)
        def build(self,input_shape):
          input_dim = input_shape[-1]
          self.input_length = input_shape[1]
          self.W0 = self.add_weight(name ='{}_W1'.format(self.name),shape = (input_dim,self.hidden),initializer = 'glorot_uniform',trainable=True) # Keras 2 API
          self.W  = self.add_weight( name ='{}_W'.format(self.name),shape = (self.hidden,1),trainable=True)
          self.b0 = K.zeros((self.hidden,),name='{}_b0'.format(self.name))
          self.b  = K.zeros((1,name='{}_b'.format(self.name))
          # self.trainable_weights = [self.W0,self.W,self.b,self.b0]
          self.regularizers =[]
          if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)
          if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)
            self.constraints = {}
          if self.W_constraint:
            self.constraints[self.W0] = self.W_constraint
            self.constraints[self.W] = self.W_constraint
            super(Attention,self).build(input_shape)
        def call(self,x,mask=None):
          attmap = self.activation(K.dot(x,self.W0)+self.b0)
          print("self.b.shape=",self.b.shape)
          attmap = K.dot(attmap,self.W) + self.b
          print("attmap.shape=",attmap.shape)
          #till Now it was for attention fully connected network/dot product
          attmap = K.reshape(attmap,(-1,self.input_length)) 
          attmap = K.softmax(attmap)
          print("attmap.shape1=",attmap.shape)
          
          print("x.shape1=",x.shape)
          dense_representation = K.batch_dot(attmap,axes=(1,1))
          print("dense_representation.shape=",dense_representation.shape)
         
          
          out = K.concatenate([dense_representation,attmap],axis=1) 
          print("out.shape=",out.shape)
          return out
    
        def compute_output_shape(self,input_shape):
            return (input_shape[0],input_shape[-1] + input_shape[1])
    
        def get_config(self):
            config = {'init': 'glorot_uniform','activation': self.activation.__name__,'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,'hidden': self.hidden if self.hidden else None}
            base_config = super(Attention,self).get_config()
            return dict(list(base_config.items()) + list(config.items()))
    
    class attention_flatten(tf.keras.layers.Layer): 
        def __init__(self,keep_dim,**kwargs):
            self.keep_dim = keep_dim
            super(attention_flatten,input_shape):
          pass
        def compute_output_shape(self,input_shape):
            if not all(input_shape[1:]):
                raise Exception('The shape of the input to "Flatten" '
                                'is not fully defined '
                                '(got ' + str(input_shape[1:]) + '. '
                                'Make sure to pass a complete "input_shape" '
                                'or "batch_input_shape" argument to the first '
                                'layer in your model.')
            return (input_shape[0],self.keep_dim)   
        def call(self,mask=None):
            x=x[1:,:self.keep_dim]
            return (x)
    
    def set_up_model_up():
          print('building model')
          seq_input_shape = (2000,4,)
          nb_filter = 64
          filter_length = 6
          input_shape = (2000,)
          attentionhidden = 256
          seq_input = Input(shape = seq_input_shape,name = 'seq_input')
          convul1   = tf.keras.layers.Convolution1D(filters = nb_filter,kernel_size = filter_length,padding = 'valid',activation = 'relu',kernel_constraint = maxnorm(3),)
          # modil = Sequential()                      
          pool_ma1 = keras.layers.MaxPooling1D(pool_size = 3)
          dropout1 = Dropout(0.5977908689086315)
          dropout2 = Dropout(0.30131233477637737)
          decoder  = Attention(hidden = attentionhidden,activation = 'linear')
          dense1   = tf.keras.layers.Dense(1)
          dense2   = tf.keras.layers.Dense(256)
         
          output_1 = pool_ma1(convul1(seq_input))
          output_2 = (output_1)
          output_6=tf.reshape(output_2,[1,1995,64])
          print("output_2's'shap[2]e=",output_2.shape[2])
          print("output_2's'shape=",output_2.shape)
          att_decoder  = decoder(output_6) 
          # tf.slice(output_6,begin,size,name=None)
    
          output_3 = attention_flatten(3)(att_decoder)
          output_2=tf.reshape(output_2,3,42560])
         
          output_4 =  dense1(((output_2)))
          
          output_4=tf.keras.layers.Flatten()(output_4)
          print("output_3p.shape=",output_3.shape)
          print("output_4p.shape=",output_4.shape)
          all_outp = K.concatenate([output_3,output_4],axis=0)
          print(all_outp)
          print("all_outp.shape",all_outp.shape)
          output_5 =  dense2(all_outp)
          tf.keras.layers.Add()
          output_f =  tf.keras.layers.Activation('sigmoid')(output_5)
          output_c=tf.keras.layers.Add()(output_f)
          modil=tf.keras.models.Model(inputs = seq_input,outputs = output_f)
          modil.build(input_shape=input_shape)
          modil.compile(loss = 'binary_crossentropy',optimizer = 'nadam',metrics = ['accuracy'])
          
          print (modil.summary())
          print("len(modil number of layers)",len(modil.layers))
          return modil
          
    def test(n_estimators = 16):
            model = set_up_model_up()
            model.save_weights('Sequential_model_weights.h5')
           model.load_weights('Sequential_model_weights.h5',by_name=True)
          
            X_test = np.load('/content/drive/MyDrive/X_test.npy',mmap_mode='r')
            y_test = np.load('/content/drive/MyDrive/y_test.npy',mmap_mode='r')
            ensemble = np.zeros(len(X_test))
            for i in range(n_estimators):
               print ('testing',i,'model')
               print ('model shape is',model.summary)
               model.load_weights('/content/drive/MyDrive/model/bestmodel_split_chr_GD_'+ str(i) + '.hdf5')
               print ('model shape after loading is',model.summary)
             
               print ('Predicting...')
              
               print ('testing',X_test.shape)
               print (len(model.layers))
               #  y_score = model.predict(np.expand_dims(np.array(X_test,dtype=np.float32),0),verbose = 1,batch_size = 256)
               formatmul= np.empty((3,2000,4),dtype=object)
               for x in range(0,2):
                 for y in range(0,1999):
                   for z in range(0,3):
                     formatmul[x][y][z]=X_test[x][y][z]
               #  y_score = model.predict(X_test).reshape(665,-1),batch_size = 256)
               print("model.output_shape",model.output_shape)
               print("model.input_shape",model.input_shape)
               #  y_score = model.predict(formatmul,batch_size=42560)
               y_score = model.predict(np.array(formatmul,batch_size =64)
               y_pred = []
            for item in y_score:
               y_pred.append(item[0])
               y_pred =  np.array(y_pred)
               ensemble += y_pred
            ensemble /= n_estimators
            np.save('/content/drive/MyDrive/test_result/y_test',y_test)
            np.save('/content/drive/MyDrive/test_result/y_pred',ensemble)
            auroc = roc_auc_score(y_test,ensemble)
            aupr  = average_precision_score(y_test,ensemble)
            print ('auroc',auroc)
            print ('aupr',aupr)
    
    test(n_estimators = 16)

堆栈跟踪:

WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
building model
output_2's'shap[2]e= 64
output_2's'shape= (?,665,64)
self.b.shape= (1,)
attmap.shape= (1,1)
attmap.shape1= (1,1995)
x.shape1= (1,64)
dense_representation.shape= (1,64)
out.shape= (1,2059)
output_3p.shape= (0,3)
output_4p.shape= (1,3)
Tensor("concat:0",shape=(1,3),dtype=float32)
all_outp.shape (1,3)
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
conv1d (Conv1D)                 (None,64)     1600        seq_input[0][0]                  
__________________________________________________________________________________________________
tf_op_layer_Reshape (TensorFlow [(1,64)]      0           max_pooling1d[0][0]              
__________________________________________________________________________________________________
tf_op_layer_Reshape_1 (TensorFl [(1,42560)]      0           max_pooling1d[0][0]              
__________________________________________________________________________________________________
attention (Attention)           (1,2059)            16897       tf_op_layer_Reshape[0][0]        
__________________________________________________________________________________________________
dense (Dense)                   (1,1)            42561       tf_op_layer_Reshape_1[0][0]      
__________________________________________________________________________________________________
attention_flatten (attention_fl (0,3)               0           attention[0][0]                  
__________________________________________________________________________________________________
flatten (Flatten)               (1,3)               0           dense[0][0]                      
__________________________________________________________________________________________________
tf_op_layer_concat (TensorFlowO [(1,3)]             0           attention_flatten[0][0]          
                                                                 flatten[0][0]                    
__________________________________________________________________________________________________
dense_1 (Dense)                 (1,256)             1024        tf_op_layer_concat[0][0]         
__________________________________________________________________________________________________
activation (Activation)         (1,256)             0           dense_1[0][0]                    
==================================================================================================
Total params: 62,082
Trainable params: 62,082
Non-trainable params: 0
__________________________________________________________________________________________________
None
len(modil number of layers) 10
testing 0 model
model shape is <bound method Model.summary of <tensorflow.python.keras.engine.functional.Functional object at 0x7f5faf959350>>
model shape after loading is <bound method Model.summary of <tensorflow.python.keras.engine.functional.Functional object at 0x7f5faf959350>>
Predicting...
testing (172832,4)
10
model.output_shape (1,256)
model.input_shape (None,4)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:2426: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0,as `updates` are applied automatically.
  warnings.warn('`Model.state_updates` will be removed in a future version. '
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-1-b9a05eeb9fa1> in <module>()
    240 # if __name__ == '__main__':
    241 # set_up_model_up()
--> 242 test(n_estimators = 16)

5 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/client/session.py in __call__(self,*args,**kwargs)
   1480         ret = tf_session.TF_Sessionruncallable(self._session._session,1481                                                self._handle,args,-> 1482                                                run_Metadata_ptr)
   1483         if run_Metadata:
   1484           proto_data = tf_session.TF_GetBuffer(run_Metadata_ptr)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。