您正在尝试将包含 36 层的权重文件加载到具有 19 层的模型中

如何解决您正在尝试将包含 36 层的权重文件加载到具有 19 层的模型中

import numpy as np
import keras
from keras.models import Model
from keras.layers import Input,Conv2D,MaxPooling2D,UpSampling2D,concatenate,Cropping2D,ZeroPadding2D,Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,LearningRateScheduler,TensorBoard
from keras import backend as K
import os
from skimage.transform import resize
from skimage.io import imsave

K.set_image_data_format('channels_first')  # Theano dimension ordering in this code

working_path = '../luna/output/'
main_path = '../luna/'
unet_weight = '../luna/unet.hdf5'



BATCH_SIZE=8
EPOCHS=60
img_rows = 512
img_cols = 512

smooth = 1.


def dice_coef(y_true,y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)


def dice_coef_np(y_true,y_pred):
    y_true_f = y_true.flatten()
    y_pred_f = y_pred.flatten()
    intersection = np.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)


def dice_coef_loss(y_true,y_pred):
    return 1 - dice_coef(y_true,y_pred)


def get_model():

    inputs = Input((1,img_rows,img_cols))

    conv1 = Conv2D(32,(3,3),activation='relu',padding='same')(inputs)
    conv1 = Conv2D(32,padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2,2))(conv1)

    conv2 = Conv2D(64,padding='same')(pool1)
    conv2 = Conv2D(64,padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2,2))(conv2)

    conv3 = Conv2D(128,padding='same')(pool2)
    conv3 = Conv2D(128,padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2,2))(conv3)

    conv4 = Conv2D(256,padding='same')(pool3)
    conv4 = Conv2D(256,padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2,2))(conv4)

    conv5 = Conv2D(512,padding='same')(pool4)
    conv5 = Conv2D(512,padding='same')(conv5)

    up6 = concatenate([UpSampling2D(size=(2,2))(conv5),conv4],axis=1)
    conv6 = Conv2D(256,padding='same')(up6)
    conv6 = Conv2D(256,padding='same')(conv6)

    up7 = concatenate([UpSampling2D(size=(2,2))(conv6),conv3],axis=1)
    conv7 = Conv2D(128,padding='same')(up7)
    conv7 = Conv2D(128,padding='same')(conv7)

    up8 = concatenate([UpSampling2D(size=(2,2))(conv7),conv2],axis=1)
    conv8 = Conv2D(64,padding='same')(up8)
    conv8 = Conv2D(64,padding='same')(conv8)

    up9 = concatenate([UpSampling2D(size=(2,2))(conv8),conv1],axis=1)
    conv9 = Conv2D(32,padding='same')(up9)
    conv9 = Conv2D(32,padding='same')(conv9)
    conv10 = Conv2D(1,(1,1),activation='sigmoid')(conv9)

    model = Model(inputs=inputs,outputs=conv10)
    print (model.summary())
#     model.compile(optimizer=Adam(lr=1.0e-5),loss=dice_coef_loss,metrics=[dice_coef])
    model.compile(optimizer=Adam(lr=1e-5),loss='binary_crossentropy',metrics = ['accuracy'])
    return model


def get_unet():
    
    K.set_image_data_format('channels_first')  # Theano dimension ordering in this code
    inputs = Input((1,padding='same')(conv5)

    up6 = concatenate([Conv2DTranspose(256,(2,2),strides=(2,padding='same')(conv5),padding='same')(conv6)

    up7 = concatenate([Conv2DTranspose(128,padding='same')(conv6),padding='same')(conv7)

    up8 = concatenate([Conv2DTranspose(64,padding='same')(conv7),padding='same')(conv8)

    up9 = concatenate([Conv2DTranspose(32,padding='same')(conv8),padding='same')(conv9)

    conv10 = Conv2D(1,activation='sigmoid')(conv9)

    model = Model(inputs=[inputs],outputs=[conv10])
    print (model.summary())
    model.compile(optimizer=Adam(lr=1e-5),metrics=[dice_coef])

    return model


def get_unet():
    
    K.set_image_data_format('channels_first')  # Theano dimension ordering in this code
    inputs = Input((1,metrics=[dice_coef])

    return model

def train_and_predict(use_existing):
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print ('BATCH_SIZE : {}'.format(BATCH_SIZE))
    print ('EPOCHS : {}'.format(EPOCHS))
    imgs_train = np.load(main_path + "trainImages.npy").astype(np.float32)
    imgs_mask_train = np.load(main_path + "trainMasks.npy").astype(np.float32)

imgs_test = np.load(main_path + "testImages.npy").astype(np.float32)
    imgs_mask_test_true = np.load(main_path + "testMasks.npy").astype(np.float32)

    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean  # images should already be standardized,but just in case
    imgs_train /= std

    print('-' * 30)
    print('Creating and compiling model...')

    model = get_model()
    # Saving weights to unet.hdf5 at checkpoints

    best_weight_path = '/root/sharedfolder/luna16/model/best_unet_upsampling.hdf5'
    model_checkpoint = ModelCheckpoint(best_weight_path,monitor='val_loss',save_best_only=True)
    tb = TensorBoard(log_dir="../logs_281118",batch_size=BATCH_SIZE)

    best_weight_path = '../model/best_unet_upsampling_{}.hdf5'.format(BATCH_SIZE)
    model_checkpoint = ModelCheckpoint(best_weight_path,save_best_only=True)
    tb = TensorBoard(log_dir="../logs_upsampling",batch_size=BATCH_SIZE)

    # Set argument for call to train_and_predict to true at end of script
    if use_existing:
        print('loading weights...')
        model.load_weights(unet_weight)

    print('-' * 30)
    print('Fitting model...')

    model.fit(imgs_train,imgs_mask_train,validation_split=0.15,batch_size=BATCH_SIZE,epochs=EPOCHS,verbose=1,shuffle=True,callbacks=[model_checkpoint,tb])


    # loading best weights from training session
    print('-' * 30)
    print('Loading saved weights...')


    model.load_weights(unet_weight)

    model.load_weights(best_weight_path)

    print('-' * 30)
    print('Predicting masks on test data...')

    num_test = len(imgs_test)
    imgs_mask_test = np.ndarray([num_test,1,512,512],dtype=np.float32)
    for i in range(num_test):
        imgs_mask_test[i] = model.predict([imgs_test[i:i + 1]],verbose=0)[0]

    np.save('../masksTestPredictedAll.npy',imgs_mask_test)
    np.save('../masks_mask_test.npy',imgs_mask_test)
    
    print('-' * 30)
    print('Calculate mean dice coeff...')
    
    mean = 0.0
    for i in range(num_test):
        mean += dice_coef_np(imgs_mask_test_true[i,0],imgs_mask_test[i,0])
    mean /= num_test


#     print("Mean Dice Coeff : ",mean)

if __name__ == '__main__':
    train_and_predict(True)

我正在尝试从 GitHub 实施一个项目,但在运行后显示错误

You are trying to load a weight file containing 36 layers into a model with 19 layers

如何解决这个错误? 有些地方他们建议在输入层中放置“输入形状”,但它也显示错误。再次有一些建议是使用 keras version==2.00 。这也不能解决问题。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams['font.sans-serif'] = ['SimHei'] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -> systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping("/hires") public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate<String
使用vite构建项目报错 C:\Users\ychen\work>npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-
参考1 参考2 解决方案 # 点击安装源 协议选择 http:// 路径填写 mirrors.aliyun.com/centos/8.3.2011/BaseOS/x86_64/os URL类型 软件库URL 其他路径 # 版本 7 mirrors.aliyun.com/centos/7/os/x86
报错1 [root@slave1 data_mocker]# kafka-console-consumer.sh --bootstrap-server slave1:9092 --topic topic_db [2023-12-19 18:31:12,770] WARN [Consumer clie
错误1 # 重写数据 hive (edu)> insert overwrite table dwd_trade_cart_add_inc > select data.id, > data.user_id, > data.course_id, > date_format(
错误1 hive (edu)> insert into huanhuan values(1,'haoge'); Query ID = root_20240110071417_fe1517ad-3607-41f4-bdcf-d00b98ac443e Total jobs = 1
报错1:执行到如下就不执行了,没有显示Successfully registered new MBean. [root@slave1 bin]# /usr/local/software/flume-1.9.0/bin/flume-ng agent -n a1 -c /usr/local/softwa
虚拟及没有启动任何服务器查看jps会显示jps,如果没有显示任何东西 [root@slave2 ~]# jps 9647 Jps 解决方案 # 进入/tmp查看 [root@slave1 dfs]# cd /tmp [root@slave1 tmp]# ll 总用量 48 drwxr-xr-x. 2
报错1 hive> show databases; OK Failed with exception java.io.IOException:java.lang.RuntimeException: Error in configuring object Time taken: 0.474 se
报错1 [root@localhost ~]# vim -bash: vim: 未找到命令 安装vim yum -y install vim* # 查看是否安装成功 [root@hadoop01 hadoop]# rpm -qa |grep vim vim-X11-7.4.629-8.el7_9.x
修改hadoop配置 vi /usr/local/software/hadoop-2.9.2/etc/hadoop/yarn-site.xml # 添加如下 <configuration> <property> <name>yarn.nodemanager.res