Gluoncv-Finetune Faster-RCNN模型

如何解决Gluoncv-Finetune Faster-RCNN模型

我正在尝试在自定义数据集上微调Faster-RCNN,并且遵循corresponding tutorial。 正如最后所提到的,该教程旨在用于SSD模型,我试图通过包括来自train_faster_rcnn.py文件的F-RCNN块来对其进行修改。

train_faster_rcnn.py文件的主要区别在于我需要微调数据集,因此我更改了获取数据集的功能以读取自己的.rec文件,而不是下载COCO,voc或类似文件。 我对希望作为初始参数传递的变量进行了硬编码,然后将其传递给训练函数。对于其余部分,我使用了原始文件中的其他代码块。 这就是我现在拥有的:

import time
import os
import logging
import mxnet as mx
from mxnet import autograd,gluon
import gluoncv as gcv
from mxboard import SummaryWriter
from gluoncv.data.batchify import FasterRCNNTrainBatchify,Tuple,Append
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultTrainTransform,\
    FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.parallel import Parallelizable,Parallel
from gluoncv.utils.metrics.rcnn import RPNAccMetric,RPNL1LossMetric,RCNNAccMetric,\
    RCNNL1LossMetric


def main():
    ## try to use GPU for training
    # try:
    #   ctx = [mx.gpu(1)]
    # except:
    #   ctx = [mx.cpu()]

    ctx = [mx.cpu(0)]

    # network
    kwargs = {}
    module_list = []

    ## whether to use feature pyramid network
    use_fpn = False
    if use_fpn:
        module_list.append('fpn')

    for param in net.collect_params().values():
        if param._data is not None:
            continue
        param.initialize()
    net.collect_params().reset_ctx(ctx)

    # output log file
    log_file = open(f'{saved_weights_path}{project_name}_{model_name}_log_file.txt','w')
    log_file.write("Epoch".rjust(8))
    for class_name in classes:
        log_file.write(f"{class_name:>15}")
    log_file.write("Total".rjust(15))
    log_file.write("\n")
    # summary file for tensorboard
    sw = SummaryWriter(logdir=saved_weights_path+'logs/',flush_secs=30)

    # prepare data
    data_shape = 512
    train_dataset = gcv.data.RecordFileDetection(f'custom_dataset/train_{project_name}.rec',coord_normalized=True)
    val_dataset  = gcv.data.RecordFileDetection(f'custom_dataset/test_{project_name}.rec',coord_normalized=True)
    eval_metric = VOC07MApMetric(iou_thresh=0.5,class_names=classes)
    # COCO metrics seem to work only on COCO dataset,while custom dataset is a RecordFileDetection file!
    # eval_metric = COCODetectionMetric(val_dataset,'_eval',data_shape=(data_shape,data_shape))

    # create data batches from dataset (net,train_dataset,data_shape,batch_size,num_workers):
    train_data,val_data = get_dataloader(net,val_dataset,FasterRCNNDefaultTrainTransform,FasterRCNNDefaultValTransform,len(ctx),use_fpn,num_workers=0)
    print(f"train dataloader -> {len(train_data)}")
    print(f"test dataloader -> {len(val_data)}")

    # training
    train(net,model_name,train_data,val_data,eval_metric,ctx,lr=0.001,wd=0.0005,momentum=0.9,lr_decay=0.1,lr_decay_epoch='',lr_warmup=1000,lr_warmup_factor=1. / 3.,start_epoch=0,epochs=100,log_interval=100,val_interval=1)




def get_dataloader(net,train_transform,val_transform,num_shards,num_workers):
    """Get dataloader."""
    train_bfn = FasterRCNNTrainBatchify(net,num_shards)
    if hasattr(train_dataset,'get_im_aspect_ratio'):
        im_aspect_ratio = train_dataset.get_im_aspect_ratio()
    else:
        im_aspect_ratio = [1.] * len(train_dataset)
    train_sampler = \
        gcv.nn.sampler.SplitSortedBucketSampler(im_aspect_ratio,num_parts = 1,part_index = 0,shuffle=True)
    train_loader = mx.gluon.data.DataLoader(train_dataset.transform(
        train_transform(net.short,net.max_size,net,ashape=net.ashape,multi_stage=use_fpn)),batch_sampler=train_sampler,batchify_fn=train_bfn,num_workers=num_workers)
    val_bfn = Tuple(*[Append() for _ in range(3)])
    short = net.short[-1] if isinstance(net.short,(tuple,list)) else net.short
    # validation use 1 sample per device
    val_loader = mx.gluon.data.DataLoader(
        val_dataset.transform(val_transform(short,net.max_size)),False,batchify_fn=val_bfn,last_batch='keep',num_workers=num_workers)
    return train_loader,val_loader


class ForwardBackwardTask(Parallelizable):
    def __init__(self,optimizer,rpn_cls_loss,rpn_box_loss,rcnn_cls_loss,rcnn_box_loss,mix_ratio):
        super(ForwardBackwardTask,self).__init__()
        self.net = net
        self._optimizer = optimizer
        self.rpn_cls_loss = rpn_cls_loss
        self.rpn_box_loss = rpn_box_loss
        self.rcnn_cls_loss = rcnn_cls_loss
        self.rcnn_box_loss = rcnn_box_loss
        self.mix_ratio = mix_ratio

    def forward_backward(self,x):
        data,label,rpn_cls_targets,rpn_box_targets,rpn_box_masks = x
        with autograd.record():
            gt_label = label[:,:,4:5]
            gt_box = label[:,:4]
            cls_pred,box_pred,roi,samples,matches,rpn_score,rpn_box,anchors,cls_targets,\
            box_targets,box_masks,_ = net(data,gt_box,gt_label)
            # losses of rpn
            rpn_score = rpn_score.squeeze(axis=-1)
            num_rpn_pos = (rpn_cls_targets >= 0).sum()
            rpn_loss1 = self.rpn_cls_loss(rpn_score,rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
            rpn_loss2 = self.rpn_box_loss(rpn_box,rpn_box_masks) * rpn_box.size / num_rpn_pos
            # rpn overall loss,use sum rather than average
            rpn_loss = rpn_loss1 + rpn_loss2
            # losses of rcnn
            num_rcnn_pos = (cls_targets >= 0).sum()
            rcnn_loss1 = self.rcnn_cls_loss(cls_pred,cls_targets.expand_dims(-1) >= 0) * cls_targets.size / \
                         num_rcnn_pos
            rcnn_loss2 = self.rcnn_box_loss(box_pred,box_targets,box_masks) * box_pred.size / \
                         num_rcnn_pos
            rcnn_loss = rcnn_loss1 + rcnn_loss2
            # overall losses
            total_loss = rpn_loss.sum() * self.mix_ratio + rcnn_loss.sum() * self.mix_ratio

            rpn_loss1_metric = rpn_loss1.mean() * self.mix_ratio
            rpn_loss2_metric = rpn_loss2.mean() * self.mix_ratio
            rcnn_loss1_metric = rcnn_loss1.mean() * self.mix_ratio
            rcnn_loss2_metric = rcnn_loss2.mean() * self.mix_ratio
            rpn_acc_metric = [[rpn_cls_targets,rpn_cls_targets >= 0],[rpn_score]]
            rpn_l1_loss_metric = [[rpn_box_targets,rpn_box_masks],[rpn_box]]
            rcnn_acc_metric = [[cls_targets],[cls_pred]]
            rcnn_l1_loss_metric = [[box_targets,box_masks],[box_pred]]

            total_loss.backward()

        return rpn_loss1_metric,rpn_loss2_metric,rcnn_loss1_metric,rcnn_loss2_metric,\
               rpn_acc_metric,rpn_l1_loss_metric,rcnn_acc_metric,rcnn_l1_loss_metric



def train(net,lr,wd,momentum,lr_decay,lr_decay_epoch,lr_warmup,lr_warmup_factor,start_epoch,epochs,log_interval,val_interval):
    """Training pipeline"""
    kv_store = 'local'
    net.collect_params().setattr('grad_req','null')
    net.collect_train_params().setattr('grad_req','write')
    optimizer_params = {'learning_rate': lr,'wd': wd,'momentum': momentum}
    trainer = gluon.Trainer(
        net.collect_train_params(),# fix batchnorm,fix first stage,etc...
        'sgd',optimizer_params,update_on_kvstore=None,kvstore=kv_store)


    # lr decay policy
    lr_decay = float(lr_decay)
    lr_steps = sorted([float(ls) for ls in lr_decay_epoch.split(',') if ls.strip()])
    lr_warmup = float(lr_warmup)  # avoid int division

    # TODO(zhreshold) losses?
    rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1 / 9.)  # == smoothl1
    rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
    rcnn_box_loss = mx.gluon.loss.HuberLoss()  # == smoothl1
    metrics = [mx.metric.Loss('RPN_Conf'),mx.metric.Loss('RPN_SmoothL1'),mx.metric.Loss('RCNN_CrossEntropy'),mx.metric.Loss('RCNN_SmoothL1'),]

    rpn_acc_metric = RPNAccMetric()
    rpn_bbox_metric = RPNL1LossMetric()
    rcnn_acc_metric = RCNNAccMetric()
    rcnn_bbox_metric = RCNNL1LossMetric()
    metrics2 = [rpn_acc_metric,rpn_bbox_metric,rcnn_bbox_metric]

    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    log_file_path = model_name + '_train.log'
    log_dir = os.path.dirname(log_file_path)
    if log_dir and not os.path.exists(log_dir):
        os.makedirs(log_dir)
    fh = logging.FileHandler(log_file_path)
    logger.addHandler(fh)
    logger.info('Start training from [Epoch {}]'.format(start_epoch))
    best_map = [0]
    for epoch in range(start_epoch,epochs):
        mix_ratio = 1.0
        rcnn_task = ForwardBackwardTask(net,trainer,mix_ratio=1.0)
        executor = Parallel(1,rcnn_task)
        while lr_steps and epoch >= lr_steps[0]:
            new_lr = trainer.learning_rate * lr_decay
            lr_steps.pop(0)
            trainer.set_learning_rate(new_lr)
            logger.info("[Epoch {}] Set learning rate to {}".format(epoch,new_lr))
        for metric in metrics:
            metric.reset()
        tic = time.time()
        btic = time.time()
        base_lr = trainer.learning_rate
        rcnn_task.mix_ratio = mix_ratio
        for i,batch in enumerate(train_data):
            if epoch == 0 and i <= lr_warmup:
                # adjust based on real percentage
                new_lr = base_lr * get_lr_at_iter(i / lr_warmup,lr_warmup_factor)
                if new_lr != trainer.learning_rate:
                    if i % log_interval == 0:
                        logger.info(
                            '[Epoch 0 Iteration {}] Set learning rate to {}'.format(i,new_lr))
                    trainer.set_learning_rate(new_lr)
            batch = split_and_load(batch,ctx_list=ctx)
            metric_losses = [[] for _ in metrics]
            add_losses = [[] for _ in metrics2]
            if executor is not None:
                for data in zip(*batch):
                    executor.put(data)
            for j in range(len(ctx)):
                if executor is not None:
                    result = executor.get()
                else:
                    result = rcnn_task.forward_backward(list(zip(*batch))[0])
                for k in range(len(metric_losses)):
                    metric_losses[k].append(result[k])
                for k in range(len(add_losses)):
                    add_losses[k].append(result[len(metric_losses) + k])
            for metric,record in zip(metrics,metric_losses):
                metric.update(0,record)
            for metric,records in zip(metrics2,add_losses):
                for pred in records:
                    metric.update(pred[0],pred[1])
            trainer.step(batch_size)

            # update metrics
            if log_interval and not (i + 1) % log_interval:
                msg = ','.join(
                    ['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])
                logger.info('[Epoch {}][Batch {}],Speed: {:.3f} samples/sec,{}'.format(
                    epoch,i,log_interval * batch_size / (time.time() - btic),msg))
                btic = time.time()

        msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
        logger.info('[Epoch {}] Training cost: {:.3f},{}'.format(
            epoch,(time.time() - tic),msg))
        if not (epoch + 1) % val_interval:
            # consider reduce the frequency of validation to save time
            map_name,mean_ap = validate(net,eval_metric)
            val_msg = '\n'.join(['{}={}'.format(k,v) for k,v in zip(map_name,mean_ap)])
            logger.info('[Epoch {}] Validation: \n{}'.format(epoch,val_msg))
            current_map = float(mean_ap[-1])
        else:
            current_map = 0.
        save_params(net,logger,best_map,current_map,epoch,1,model_name)


def save_params(net,save_interval,prefix):
    current_map = float(current_map)
    if current_map > best_map[0]:
        logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
            epoch,'{:s}_best.params'.format(prefix)))
        best_map[0] = current_map
        net.save_parameters('{:s}_best.params'.format(prefix))
        with open(prefix + '_best_map.log','a') as f:
            f.write('{:04d}:\t{:.4f}\n'.format(epoch,current_map))
    if save_interval and (epoch + 1) % save_interval == 0:
        logger.info('[Epoch {}] Saving parameters to {}'.format(
            epoch,'{:s}_{:04d}_{:.4f}.params'.format(prefix,current_map)))
        net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix,current_map))


def split_and_load(batch,ctx_list):
    """Split data to 1 batch each device."""
    new_batch = []
    for i,data in enumerate(batch):
        if isinstance(data,(list,tuple)):
            new_data = [x.as_in_context(ctx) for x,ctx in zip(data,ctx_list)]
        else:
            new_data = [data.as_in_context(ctx_list[0])]
        new_batch.append(new_data)
    return new_batch


def validate(net,eval_metric):
    """Test on validation dataset."""
    clipper = gcv.nn.bbox.BBoxClipToImage()
    eval_metric.reset()
    net.hybridize(static_alloc=False)
    for batch in val_data:
        batch = split_and_load(batch,ctx_list=ctx)
        det_bboxes = []
        det_ids = []
        det_scores = []
        gt_bboxes = []
        gt_ids = []
        gt_difficults = []
        for x,y,im_scale in zip(*batch):
            # get prediction results
            ids,scores,bboxes = net(x)
            det_ids.append(ids)
            det_scores.append(scores)
            # clip to image size
            det_bboxes.append(clipper(bboxes,x))
            # rescale to original resolution
            im_scale = im_scale.reshape((-1)).asscalar()
            det_bboxes[-1] *= im_scale
            # split ground truths
            gt_ids.append(y.slice_axis(axis=-1,begin=4,end=5))
            gt_bboxes.append(y.slice_axis(axis=-1,begin=0,end=4))
            gt_bboxes[-1] *= im_scale
            gt_difficults.append(y.slice_axis(axis=-1,begin=5,end=6) if y.shape[-1] > 5 else None)

        # update metric
        for det_bbox,det_id,det_score,gt_bbox,gt_id,gt_diff in zip(det_bboxes,det_ids,det_scores,gt_bboxes,gt_ids,gt_difficults):
            eval_metric.update(det_bbox,gt_diff)
    return eval_metric.get()


def get_lr_at_iter(alpha,lr_warmup_factor=1. / 3.):
    return lr_warmup_factor * (1 - alpha) + alpha



if __name__ == "__main__":
    # prepare model
    model_name = "faster_rcnn_resnet50_v1b_coco"
    ## this will be used to automatically determine input and output file names
    project_name = "natak_all"
    classes = ['ball','bb_ball','drum','guitar','koshi_bell','massager','ring','snake','tinsel']
    batch_size = 8
    # pre-trained model,reset network to predict new class
    net = gcv.model_zoo.get_model(model_name,pretrained=True)
    # net = gcv.model_zoo.get_model(model_name,classes=classes,pretrained=False,transfer='coco')
    net.reset_class(classes)
    # folder where trained model will be saved
    saved_weights_path = f"saved_weights/{project_name}_{model_name}/"
    if not os.path.exists(saved_weights_path):
        os.makedirs(saved_weights_path)

    main()

问题是,当我尝试运行它时,出现错误:

mxnet.base.MXNetError: MXNetError: Shape inconsistent,Provided = [1,128],inferred shape=[8,128]

所以,看来我加载的数据不正确。有什么建议吗?

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams[&#39;font.sans-serif&#39;] = [&#39;SimHei&#39;] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -&gt; systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping(&quot;/hires&quot;) public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate&lt;String
使用vite构建项目报错 C:\Users\ychen\work&gt;npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-
参考1 参考2 解决方案 # 点击安装源 协议选择 http:// 路径填写 mirrors.aliyun.com/centos/8.3.2011/BaseOS/x86_64/os URL类型 软件库URL 其他路径 # 版本 7 mirrors.aliyun.com/centos/7/os/x86
报错1 [root@slave1 data_mocker]# kafka-console-consumer.sh --bootstrap-server slave1:9092 --topic topic_db [2023-12-19 18:31:12,770] WARN [Consumer clie
错误1 # 重写数据 hive (edu)&gt; insert overwrite table dwd_trade_cart_add_inc &gt; select data.id, &gt; data.user_id, &gt; data.course_id, &gt; date_format(
错误1 hive (edu)&gt; insert into huanhuan values(1,&#39;haoge&#39;); Query ID = root_20240110071417_fe1517ad-3607-41f4-bdcf-d00b98ac443e Total jobs = 1
报错1:执行到如下就不执行了,没有显示Successfully registered new MBean. [root@slave1 bin]# /usr/local/software/flume-1.9.0/bin/flume-ng agent -n a1 -c /usr/local/softwa
虚拟及没有启动任何服务器查看jps会显示jps,如果没有显示任何东西 [root@slave2 ~]# jps 9647 Jps 解决方案 # 进入/tmp查看 [root@slave1 dfs]# cd /tmp [root@slave1 tmp]# ll 总用量 48 drwxr-xr-x. 2
报错1 hive&gt; show databases; OK Failed with exception java.io.IOException:java.lang.RuntimeException: Error in configuring object Time taken: 0.474 se
报错1 [root@localhost ~]# vim -bash: vim: 未找到命令 安装vim yum -y install vim* # 查看是否安装成功 [root@hadoop01 hadoop]# rpm -qa |grep vim vim-X11-7.4.629-8.el7_9.x
修改hadoop配置 vi /usr/local/software/hadoop-2.9.2/etc/hadoop/yarn-site.xml # 添加如下 &lt;configuration&gt; &lt;property&gt; &lt;name&gt;yarn.nodemanager.res