带有BatchNormalizationaxis = CHANNEL_AXIS的“ TypeError:不可哈希类型:'Dimension'”输入

如何解决带有BatchNormalizationaxis = CHANNEL_AXIS的“ TypeError:不可哈希类型:'Dimension'”输入

我有以下从github获得的resnet 3D架构。这是R3D的Keras实现。该体系结构旨在训练视频分类模型

## resnet 3D architecture
# Taken from https://github.com/JihongJu/keras-resnet3d/blob/master/resnet3d/resnet3d.py

def _bn_relu(input):
    """Helper to build a BN -> relu block (by @raghakot)."""
    norm = Batchnormalization(axis=CHANNEL_AXIS)(input)
    return Activation("relu")(norm)

def _conv_bn_relu3D(**conv_params):
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides",(1,1,1))
    kernel_initializer = conv_params.setdefault(
        "kernel_initializer","he_normal")
    padding = conv_params.setdefault("padding","same")
    kernel_regularizer = conv_params.setdefault("kernel_regularizer",l2(1e-4))

    def f(input):
        conv = Conv3D(filters=filters,kernel_size=kernel_size,strides=strides,kernel_initializer=kernel_initializer,padding=padding,kernel_regularizer=kernel_regularizer)(input)
        return _bn_relu(conv)

    return f

def _bn_relu_conv3d(**conv_params):
    """Helper to build a  BN -> relu -> conv3d block."""
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides",1))
    kernel_initializer = conv_params.setdefault("kernel_initializer",l2(1e-4))

    def f(input):
        activation = _bn_relu(input)
        return Conv3D(filters=filters,kernel_regularizer=kernel_regularizer)(activation)
    return f


def _shortcut3d(input,residual):
    """3D shortcut to match input and residual and merges them with "sum"."""
    stride_dim1 = math.ceil(int(input.shape[DIM1_AXIS]) \
        / int(residual.shape[DIM1_AXIS]))
    stride_dim2 = math.ceil(int(input.shape[DIM2_AXIS]) \
        / int(residual.shape[DIM2_AXIS]))
    stride_dim3 = math.ceil(int(input.shape[DIM3_AXIS]) \
        / int(residual.shape[DIM3_AXIS]))
    equal_channels = int(residual.shape[CHANNEL_AXIS]) \
        == int(input.shape[CHANNEL_AXIS])

    shortcut = input
    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
            or not equal_channels:
        shortcut = Conv3D(
            filters=residual.shape[CHANNEL_AXIS],kernel_size=(1,1),strides=(stride_dim1,stride_dim2,stride_dim3),kernel_initializer="he_normal",padding="valid",kernel_regularizer=l2(1e-4)
            )(input)
    return add([shortcut,residual])


def _residual_block3d(block_function,filters,kernel_regularizer,repetitions,is_first_layer=False):
    def f(input):
        for i in range(repetitions):
            strides = (1,1)
            if i == 0 and not is_first_layer:
                strides = (2,2,2)
            input = block_function(filters=filters,kernel_regularizer=kernel_regularizer,is_first_block_of_first_layer=(
                                       is_first_layer and i == 0)
                                   )(input)
        return input

    return f


def basic_block(filters,strides=(1,kernel_regularizer=l2(1e-4),is_first_block_of_first_layer=False):
    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
    def f(input):
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv1 = Conv3D(filters=filters,kernel_size=(3,3,3),padding="same",kernel_regularizer=kernel_regularizer
                           )(input)
        else:
            conv1 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                    )(input)

        residual = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                   )(conv1)
        return _shortcut3d(input,residual)

    return f


def bottleneck(filters,is_first_block_of_first_layer=False):
    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
    def f(input):
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv_1_1 = Conv3D(filters=filters,kernel_regularizer=kernel_regularizer
                              )(input)
        else:
            conv_1_1 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                       )(input)

        conv_3_3 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                   )(conv_1_1)
        residual = _bn_relu_conv3d(filters=filters * 4,kernel_regularizer=kernel_regularizer
                                   )(conv_3_3)

        return _shortcut3d(input,residual)

    return f


def _handle_data_format():
    global DIM1_AXIS
    global DIM2_AXIS
    global DIM3_AXIS
    global CHANNEL_AXIS
    if K.image_data_format() == 'channels_last':
        DIM1_AXIS = 1
        DIM2_AXIS = 2
        DIM3_AXIS = 3
        CHANNEL_AXIS = 4
    else:
        CHANNEL_AXIS = 1
        DIM1_AXIS = 2
        DIM2_AXIS = 3
        DIM3_AXIS = 4


def _get_block(identifier):
    if isinstance(identifier,six.string_types):
        res = globals().get(identifier)
        if not res:
            raise ValueError('Invalid {}'.format(identifier))
        return res
    return identifier


class resnet3DBuilder(object):
    """resnet3D."""

    @staticmethod
    def build(input_shape,num_outputs,block_fn,reg_factor):
        """Instantiate a vanilla resnet3D keras model.
        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1,conv_dim2,conv_dim3,channels) if dim_ordering='tf'
            (filter,conv_dim1,conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer
            block_fn: Unit block to use {'basic_block','bottlenack_block'}
            repetitions: Repetitions of unit blocks
        # Returns
            model: a 3D resnet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """
        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1,channels) "
                             "for tensorflow as backend or "
                             "(channels,conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        input = Input(shape=input_shape)
        # first conv
        conv1 = _conv_bn_relu3D(filters=64,kernel_size=(7,7,7),strides=(2,2),kernel_regularizer=l2(reg_factor)
                                )(input)
        pool1 = MaxPooling3D(pool_size=(3,padding="same")(conv1)

        # repeat blocks
        block = pool1
        filters = 64
        for i,r in enumerate(repetitions):
            block = _residual_block3d(block_fn,filters=filters,kernel_regularizer=l2(reg_factor),repetitions=r,is_first_layer=(i == 0)
                                      )(block)
            filters *= 2

        # last activation
        block_output = _bn_relu(block)

        # average poll and classification
        pool2 = AveragePooling3D(pool_size=(block.shape[DIM1_AXIS],block.shape[DIM2_AXIS],block.shape[DIM3_AXIS]),1))(block_output)
        flatten1 = Flatten()(pool2)
        if num_outputs > 1:
            dense = Dense(units=num_outputs,activation="softmax",kernel_regularizer=l2(reg_factor))(flatten1)
        else:
            dense = Dense(units=num_outputs,activation="sigmoid",kernel_regularizer=l2(reg_factor))(flatten1)

        model = Model(inputs=input,outputs=dense)
        return model

    @staticmethod
    def build_resnet_18(input_shape,reg_factor=1e-4):
        """Build resnet 18."""
        return resnet3DBuilder.build(input_shape,basic_block,[2,2],reg_factor=reg_factor)

    @staticmethod
    def build_resnet_34(input_shape,reg_factor=1e-4):
        """Build resnet 34."""
        return resnet3DBuilder.build(input_shape,[3,4,6,3],reg_factor=reg_factor)

    @staticmethod
    def build_resnet_50(input_shape,reg_factor=1e-4):
        """Build resnet 50."""
        return resnet3DBuilder.build(input_shape,bottleneck,reg_factor=reg_factor)

    @staticmethod
    def build_resnet_101(input_shape,reg_factor=1e-4):
        """Build resnet 101."""
        return resnet3DBuilder.build(input_shape,23,reg_factor=reg_factor)

    @staticmethod
    def build_resnet_152(input_shape,reg_factor=1e-4):
        """Build resnet 152."""
        return resnet3DBuilder.build(input_shape,8,36,reg_factor=reg_factor)

在视频上训练网络时,出现以下错误

Error: unhashable type: 'Dimension'

  File "<ipython-input-29-788d091a6763>",line 1961,in main
    trained_model_name)
  File "<ipython-input-29-788d091a6763>",line 1805,in train
    model = train_load_model(model_type,training_condition,sample_input.shape,nb_classes)
  File "<ipython-input-29-788d091a6763>",line 1684,in train_load_model
    model = resnet3DBuilder.build_resnet_50((96,96,20)
  File "<ipython-input-29-788d091a6763>",line 1543,in build_resnet_50
    [3,reg_factor=reg_factor)
  File "<ipython-input-29-788d091a6763>",line 1501,in build
    )(block)
  File "<ipython-input-29-788d091a6763>",line 1372,in f
    )(input)
  File "<ipython-input-29-788d091a6763>",line 1419,line 1334,in f
    activation = _bn_relu(input)
  File "<ipython-input-29-788d091a6763>",line 1300,in _bn_relu
    norm = Batchnormalization(axis=CHANNEL_AXIS)(input)
  File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py",line 440,in __call__
    self.assert_input_compatibility(inputs)
  File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py",line 345,in assert_input_compatibility
    x_shape[int(axis)] not in {value,None}):

我有以下Tensorflow,Keras和Python版本:

  • Tensorflow:1.15.0
  • Keras:2.2.4
  • Python:3.6

您能告诉我如何解决错误吗?我看到将Dimension转换为int可以在其​​他地方解决问题,但是我不知道该在这里强制转换。

解决方法

要解决此问题,我们需要将每个形状的访问权限都转换为int。

示例: residual.shape[CHANNEL_AXIS]需要重写int(residual.shape[CHANNEL_AXIS])

该代码的新版本如下:

## Resnet 3D architecture
# Taken from https://github.com/JihongJu/keras-resnet3d/blob/master/resnet3d/resnet3d.py

def _bn_relu(input):
    """Helper to build a BN -> relu block (by @raghakot)."""
    norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
    return Activation("relu")(norm)

def _conv_bn_relu3D(**conv_params):
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides",(1,1,1))
    kernel_initializer = conv_params.setdefault(
        "kernel_initializer","he_normal")
    padding = conv_params.setdefault("padding","same")
    kernel_regularizer = conv_params.setdefault("kernel_regularizer",l2(1e-4))

    def f(input):
        conv = Conv3D(filters=filters,kernel_size=kernel_size,strides=strides,kernel_initializer=kernel_initializer,padding=padding,kernel_regularizer=kernel_regularizer)(input)
        return _bn_relu(conv)

    return f

def _bn_relu_conv3d(**conv_params):
    """Helper to build a  BN -> relu -> conv3d block."""
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides",1))
    kernel_initializer = conv_params.setdefault("kernel_initializer",l2(1e-4))

    def f(input):
        activation = _bn_relu(input)
        return Conv3D(filters=filters,kernel_regularizer=kernel_regularizer)(activation)
    return f


def _shortcut3d(input,residual):
    """3D shortcut to match input and residual and merges them with "sum"."""
    stride_dim1 = math.ceil(int(input.shape[DIM1_AXIS]) \
        / int(residual.shape[DIM1_AXIS]))
    stride_dim2 = math.ceil(int(input.shape[DIM2_AXIS]) \
        / int(residual.shape[DIM2_AXIS]))
    stride_dim3 = math.ceil(int(input.shape[DIM3_AXIS]) \
        / int(residual.shape[DIM3_AXIS]))
    equal_channels = int(residual.shape[CHANNEL_AXIS]) \
        == int(input.shape[CHANNEL_AXIS])

    shortcut = input
    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
            or not equal_channels:
        shortcut = Conv3D(
            filters=int(residual.shape[CHANNEL_AXIS]),kernel_size=(1,1),strides=(stride_dim1,stride_dim2,stride_dim3),kernel_initializer="he_normal",padding="valid",kernel_regularizer=l2(1e-4)
            )(input)
    return add([shortcut,residual])


def _residual_block3d(block_function,filters,kernel_regularizer,repetitions,is_first_layer=False):
    def f(input):
        for i in range(repetitions):
            strides = (1,1)
            if i == 0 and not is_first_layer:
                strides = (2,2,2)
            input = block_function(filters=filters,kernel_regularizer=kernel_regularizer,is_first_block_of_first_layer=(
                                       is_first_layer and i == 0)
                                   )(input)
        return input

    return f


def basic_block(filters,strides=(1,kernel_regularizer=l2(1e-4),is_first_block_of_first_layer=False):
    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
    def f(input):
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv1 = Conv3D(filters=filters,kernel_size=(3,3,3),padding="same",kernel_regularizer=kernel_regularizer
                           )(input)
        else:
            conv1 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                    )(input)

        residual = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                   )(conv1)
        return _shortcut3d(input,residual)

    return f


def bottleneck(filters,is_first_block_of_first_layer=False):
    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
    def f(input):
        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv_1_1 = Conv3D(filters=filters,kernel_regularizer=kernel_regularizer
                              )(input)
        else:
            conv_1_1 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                       )(input)

        conv_3_3 = _bn_relu_conv3d(filters=filters,kernel_regularizer=kernel_regularizer
                                   )(conv_1_1)
        residual = _bn_relu_conv3d(filters=filters * 4,kernel_regularizer=kernel_regularizer
                                   )(conv_3_3)

        return _shortcut3d(input,residual)

    return f


def _handle_data_format():
    global DIM1_AXIS
    global DIM2_AXIS
    global DIM3_AXIS
    global CHANNEL_AXIS
    if K.image_data_format() == 'channels_last':
        print("here CHANNELS last")
        DIM1_AXIS = 1
        DIM2_AXIS = 2
        DIM3_AXIS = 3
        CHANNEL_AXIS = 4
    else:
        CHANNEL_AXIS = 1
        DIM1_AXIS = 2
        DIM2_AXIS = 3
        DIM3_AXIS = 4


def _get_block(identifier):
    if isinstance(identifier,six.string_types):
        res = globals().get(identifier)
        if not res:
            raise ValueError('Invalid {}'.format(identifier))
        return res
    return identifier


class Resnet3DBuilder(object):
    """ResNet3D."""

    @staticmethod
    def build(input_shape,num_outputs,block_fn,reg_factor):
        """Instantiate a vanilla ResNet3D keras model.
        # Arguments
            input_shape: Tuple of input shape in the format
            (conv_dim1,conv_dim2,conv_dim3,channels) if dim_ordering='tf'
            (filter,conv_dim1,conv_dim3) if dim_ordering='th'
            num_outputs: The number of outputs at the final softmax layer
            block_fn: Unit block to use {'basic_block','bottlenack_block'}
            repetitions: Repetitions of unit blocks
        # Returns
            model: a 3D ResNet model that takes a 5D tensor (volumetric images
            in batch) as input and returns a 1D vector (prediction) as output.
        """
        _handle_data_format()
        if len(input_shape) != 4:
            raise ValueError("Input shape should be a tuple "
                             "(conv_dim1,channels) "
                             "for tensorflow as backend or "
                             "(channels,conv_dim3) "
                             "for theano as backend")

        block_fn = _get_block(block_fn)
        input = Input(shape=input_shape)
        # first conv
        conv1 = _conv_bn_relu3D(filters=64,kernel_size=(7,7,7),strides=(2,2),kernel_regularizer=l2(reg_factor)
                                )(input)
        pool1 = MaxPooling3D(pool_size=(3,padding="same")(conv1)

        # repeat blocks
        block = pool1
        filters = 64
        for i,r in enumerate(repetitions):
            block = _residual_block3d(block_fn,filters=filters,kernel_regularizer=l2(reg_factor),repetitions=r,is_first_layer=(i == 0)
                                      )(block)
            filters *= 2

        # last activation
        block_output = _bn_relu(block)

        # average poll and classification
        pool2 = AveragePooling3D(pool_size=(int(block.shape[DIM1_AXIS]),int(block.shape[DIM2_AXIS]),int(block.shape[DIM3_AXIS])),1))(block_output)
        flatten1 = Flatten()(pool2)
        if num_outputs > 1:
            dense = Dense(units=num_outputs,activation="softmax",kernel_regularizer=l2(reg_factor))(flatten1)
        else:
            dense = Dense(units=num_outputs,activation="sigmoid",kernel_regularizer=l2(reg_factor))(flatten1)

        model = Model(inputs=input,outputs=dense)
        return model

    @staticmethod
    def build_resnet_18(input_shape,reg_factor=1e-4):
        """Build resnet 18."""
        return Resnet3DBuilder.build(input_shape,basic_block,[2,2],reg_factor=reg_factor)

    @staticmethod
    def build_resnet_34(input_shape,reg_factor=1e-4):
        """Build resnet 34."""
        return Resnet3DBuilder.build(input_shape,[3,4,6,3],reg_factor=reg_factor)

    @staticmethod
    def build_resnet_50(input_shape,reg_factor=1e-4):
        """Build resnet 50."""
        return Resnet3DBuilder.build(input_shape,bottleneck,reg_factor=reg_factor)

    @staticmethod
    def build_resnet_101(input_shape,reg_factor=1e-4):
        """Build resnet 101."""
        return Resnet3DBuilder.build(input_shape,23,reg_factor=reg_factor)

    @staticmethod
    def build_resnet_152(input_shape,reg_factor=1e-4):
        """Build resnet 152."""
        return Resnet3DBuilder.build(input_shape,8,36,reg_factor=reg_factor)

最新版本的Keras和Tensorflow不会出现此问题,但是我需要保留这两个库的旧版本,因为我的其他脚本无法在Tensorflow / Keras的最新版本上运行。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


Selenium Web驱动程序和Java。元素在(x,y)点处不可单击。其他元素将获得点击?
Python-如何使用点“。” 访问字典成员?
Java 字符串是不可变的。到底是什么意思?
Java中的“ final”关键字如何工作?(我仍然可以修改对象。)
“loop:”在Java代码中。这是什么,为什么要编译?
java.lang.ClassNotFoundException:sun.jdbc.odbc.JdbcOdbcDriver发生异常。为什么?
这是用Java进行XML解析的最佳库。
Java的PriorityQueue的内置迭代器不会以任何特定顺序遍历数据结构。为什么?
如何在Java中聆听按键时移动图像。
Java“Program to an interface”。这是什么意思?
Java在半透明框架/面板/组件上重新绘画。
Java“ Class.forName()”和“ Class.forName()。newInstance()”之间有什么区别?
在此环境中不提供编译器。也许是在JRE而不是JDK上运行?
Java用相同的方法在一个类中实现两个接口。哪种接口方法被覆盖?
Java 什么是Runtime.getRuntime()。totalMemory()和freeMemory()?
java.library.path中的java.lang.UnsatisfiedLinkError否*****。dll
JavaFX“位置是必需的。” 即使在同一包装中
Java 导入两个具有相同名称的类。怎么处理?
Java 是否应该在HttpServletResponse.getOutputStream()/。getWriter()上调用.close()?
Java RegEx元字符(。)和普通点?