微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

使用 Keras Tuner 优化超参数

如何解决使用 Keras Tuner 优化超参数

我编写了以下代码,现在我想使用 Ray Tune 优化超参数,但我不知道该怎么做。我想使用基于人口的训练方法,我想要的超参数集如下:

{
  "per_gpu_batch_size": [16,32,64],"weight_decay": (0,0.3),"learning_rate": (1e-5,5e-5),"num_epochs": [2,3,4,5]
}

实现代码如下:

import tensorflow as tf 
import tensorflow_hub as hub
import pandas as pd 
from sklearn.model_selection import train_test_split
import numpy as np   
import keras 
from tqdm import tqdm 
import pickle 
from keras.models import Model 
import keras.backend as K 
from sklearn.metrics import confusion_matrix,f1_score,classification_report 
import matplotlib.pyplot as plt 
from keras.callbacks import ModelCheckpoint 
import itertools 
from keras.models import load_model 
from sklearn.utils import shuffle
from transformers import * 
from transformers import BertTokenizer,TFBertModel,BertConfig
from random import shuffle
from sklearn.utils import shuffle
 
filename1= ('/content/drive/My Drive/رونوشت webtext.train.csv')
with open(filename1) as file:
  dr1=pd.read_csv(file)
  dr1 = shuffle(dr1)
  dr1 = dr1.sample(frac=1).reset_index(drop=True)
train_webtext=pd.DataFrame(dr1,columns=["text"])
train_webtext=train_webtext.loc[:2499]

filename2= ('/content/drive/My Drive/رونوشت xl-1542M-k40.train.csv')
with open(filename2) as file:
  dr2=pd.read_csv(file)
  dr2 = shuffle(dr2)
  dr2 = dr2.sample(frac=1).reset_index(drop=True)
train_gen=pd.DataFrame(dr2,columns=["text"])
train_gen=train_gen.loc[:2499]
labels1 = [0]*len(train_webtext)+[1]*len(train_gen)


filename3= ('/content/drive/My Drive/رونوشت webtext.valid.csv')
with open(filename3) as file:
  dr3=pd.read_csv(file)
valid_webtext=pd.DataFrame(dr3,columns=["text"])

 
filename4=('/content/drive/My Drive/رونوشت xl-1542M-k40.valid.csv')
with open(filename4) as file:
  dr4=pd.read_csv(file)
valid_gen=pd.DataFrame(dr4,columns=["text"])
labels2 = [0]*len(valid_webtext)+[1]*len(valid_gen)

data = pd.concat([train_webtext,train_gen,valid_webtext,valid_gen])

sentences=data['text']
labels=labels1+labels2
len(sentences),len(labels)
    bert_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    bert_model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased',num_labels=2)
    
    input_ids=[]
    attention_masks=[]
    
    for sent in sentences:
        bert_inp=bert_tokenizer.encode_plus(sent,add_special_tokens = True,max_length =64,pad_to_max_length = True,return_attention_mask = True)
        input_ids.append(bert_inp['input_ids'])
        attention_masks.append(bert_inp['attention_mask'])
        

input_ids=np.asarray(input_ids)
attention_masks=np.array(attention_masks)
labels=np.array(labels)

print('Preparing the pickle file.....')

pickle_inp_path='/content/drive/MyDrive/ber_inp_w5000.pkl'
pickle_mask_path='/content/drive/MyDrive/ber_mask_w5000.pkl'
pickle_label_path='/content/drive/MyDrive/ber_label_w5000.pkl'

pickle.dump((input_ids),open(pickle_inp_path,'wb'))
pickle.dump((attention_masks),open(pickle_mask_path,'wb'))
pickle.dump((labels),open(pickle_label_path,'wb'))


print('Pickle files saved as ',pickle_inp_path,pickle_mask_path,pickle_label_path)


print('Loading the saved pickle files..')

input_ids=pickle.load(open(pickle_inp_path,'rb'))
attention_masks=pickle.load(open(pickle_mask_path,'rb'))
labels=pickle.load(open(pickle_label_path,'rb'))

print('Input shape {} Attention mask shape {} Input label shape {}'.format(input_ids.shape,attention_masks.shape,labels.shape))
#split
train_inp,val_inp,train_label,val_label,train_mask,val_mask=train_test_split(input_ids,labels,attention_masks,test_size=0.6666666666666667)
print('Train inp shape {} Val input shape {}\nTrain label shape {} Val label shape {}\nTrain attention mask shape {} Val attention mask shape {}'.format(train_inp.shape,val_inp.shape,train_label.shape,val_label.shape,train_mask.shape,val_mask.shape))
#


print('\nBert Model',bert_model.summary())

log_dir='tensorboard_data/tb_bert'
model_save_path='./models/bert_model.h5'

callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,save_weights_only=True,monitor='val_loss',mode='min',save_best_only=True),keras.callbacks.TensorBoard(log_dir=log_dir)]

print('\nBert Model',bert_model.summary())

loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5,epsilon=1e-08)

bert_model.compile(loss=loss,optimizer=optimizer,metrics=[metric])

history=bert_model.fit([train_inp,train_mask],batch_size=64,epochs=5,validation_data=([val_inp,val_mask],val_label),callbacks=callbacks)

提前感谢您的指导。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。