如何解决使用 Transformer 和 PL 的令牌分类只预测一个令牌
我正在学习如何将 Pytorch Learning 用于不同的 NLP 任务。 我尝试使用 PL API 实现我在 Kaggle 中找到的令牌分类示例,但是当我在两个时期后运行我的代码时,我的模型收敛到仅预测 O(其他,而非实体)类。与 Kaggle 中的示例不同,后者能够更好地学习如何预测不同的实体。 我在本地重新运行 Kaggle 示例,我能够得到类似的结果
我的模型缺少什么?看起来我的模型实现优化是为了学习其他东西。
这是我正在使用的代码:
class NERClassifier(pl.LightningModule):
def __init__(self,train_ds,val_ds,test_ds):
super().__init__()
self.train_ds,self.val_ds,self.test_ds = train_ds,test_ds
self.num_labels = self.train_ds.features['ner_tags'].feature.num_classes
self.model = transformers.RobertaForTokenClassification.from_pretrained(FLAGS.model,num_labels=self.num_labels)
self.loss = th.nn.CrossEntropyLoss(reduction='mean')
self.confusion_matrix = th.zeros(self.num_labels,self.num_labels)
def prepare_data(self):
tokenizer = transformers.RobertaTokenizer.from_pretrained(FLAGS.model)
def _prepare_ds(ds):
ds = ds.map(_tokenize)
ds.set_format(type='torch',columns=['input_ids','attention_mask','labels'])
return ds
def _tokenize(x):
encodings = tokenizer(x['tokens'],truncation=True,padding='max_length',is_split_into_words=True)
labels = x['ner_tags'] + [0] * (tokenizer.model_max_length - len(x['ner_tags']))
return {**encodings,'labels': labels}
def _prepare():
return map(_prepare_ds,[self.train_ds,self.test_ds])
def _update_model():
labels = self.train_ds.features['ner_tags'].feature
label2id = {k: labels.str2int(k) for k in labels.names}
id2label = {v: k for k,v in label2id.items()}
self.model.config.id2label = id2label
self.model.config.label2id = label2id
self.train_ds,self.test_ds = _prepare()
_update_model()
def train_dataloader(self):
return DataLoader(self.train_ds,sampler=RandomSampler(self.train_ds),batch_size=FLAGS.batch_size,pin_memory=True,num_workers=FLAGS.num_workers)
def val_dataloader(self):
return DataLoader(self.val_ds,sampler=RandomSampler(self.val_ds),num_workers=FLAGS.num_workers)
def test_dataloader(self):
return DataLoader(self.test_ds,sampler=RandomSampler(self.test_ds),num_workers=FLAGS.num_workers)
def configure_optimizers(self):
return th.optim.AdamW(self.parameters(),lr=FLAGS.lr,eps=FLAGS.eps)
def forward(self,batch,batch_idx):
loss,logits = self.model(**batch,return_dict=False)
return loss,logits
def training_step(self,batch_idx):
print('start training step')
loss,logits = self.forward(batch,batch_idx)
self.logger.experiment.add_scalar('train_loss',loss)
return {'loss': loss,'logits': logits}
def training_epoch_end(self,outputs):
print('training epoch end')
loss = th.mean(th.stack([o['loss'].float() for o in outputs]))
self.logger.experiment.add_scalar('epoc_train_loss',loss,self.current_epoch)
def validation_step(self,batch_idx):
print('start validation step')
loss,batch_idx)
labels_hat = th.argmax(logits,dim=2)
tags = batch['attention_mask'].sum(dim=1)
labels = batch['labels']
self._update_confusion_matrix(labels,labels_hat,tags)
self.logger.experiment.add_scalar('val_loss','logits': logits,'labels_hat': labels_hat}
def validation_epoch_end(self,outputs):
print('validation epoch end')
loss = th.mean(th.stack([o['loss'].float() for o in outputs]))
self.logger.experiment.add_scalar('epoc_val_loss',self.current_epoch)
def on_validation_epoch_end(self) -> None:
print('end validation epoch')
labels = self.model.config.id2label
labels = list(labels.values())
image_tensor = get_figure_from_cm(self.confusion_matrix,labels)
self.logger.experiment.add_figure(f'confusion matrix_{self.current_epoch}',image_tensor,self.current_epoch)
def on_validation_end(self):
print('on_validation_end!!!!!')
def _update_confusion_matrix(self,labels,tags):
for label,label_hat,tag in zip(labels,tags):
true_labels = label[:tag]
predicted_labels = label_hat[:tag]
for true,pred in zip(true_labels,predicted_labels):
self.confusion_matrix[true.item()][pred.item()] += 1
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。